In [ ]:
import os
from os import path
from astropy.time import Time
from astropy.io import fits, ascii
import astropy.units as u
from astropy.table import Table
from astropy.constants import G

import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import h5py
from sqlalchemy import func
from scipy.optimize import root
from scipy.stats import scoreatpercentile
import tqdm

from thejoker import JokerSamples
from thejoker.sampler import JokerParams, TheJoker
from thejoker.plot import plot_rv_curves

from twoface.config import TWOFACE_CACHE_PATH
from twoface.db import (db_connect, AllStar, AllVisit, AllVisitToAllStar, 
                        StarResult, Status, JokerRun, initialize_db)
from twoface.data import APOGEERVData
from twoface.plot import plot_data_orbits, plot_two_panel, plot_phase_fold_residual
from twoface.mass import m2_func
from twoface.samples_analysis import unimodal_P

In [ ]:
samples_file = path.join(TWOFACE_CACHE_PATH, 'apogee-jitter.hdf5')
Session, _ = db_connect(path.join(TWOFACE_CACHE_PATH, 'apogee.sqlite'))
session = Session()

HACK


In [ ]:
stars = session.query(AllStar).join(StarResult, JokerRun, Status).filter(Status.id > 0).all()
apogee_ids = np.array([star.apogee_id for star in stars])

In [ ]:
qs = [1, 5, 25, 50, 75, 95, 99]
lnK_perc = np.zeros((len(stars), len(qs)))

with h5py.File(samples_file, 'r') as f:
    for i, star in enumerate(stars):
        lnK = np.log(f[star.apogee_id]['K'][:])
        lnK_perc[i] = np.percentile(lnK, qs)

In [ ]:
fig, ax = plt.subplots(1, 1, figsize=(8, 6))

_, bins, _ = ax.hist(lnK_perc[:, 0], bins='auto', alpha=0.4, label='1');
ax.hist(lnK_perc[:, 1], bins=bins, alpha=0.4, label='5');
ax.hist(lnK_perc[:, 2], bins=bins, alpha=0.4, label='25');
ax.legend(title=r'$\ln K$ percentile', fontsize=16)

ax.axvline(0., zorder=10, color='k', alpha=0.5, linestyle='--')

ax.set_yscale('log')
ax.set_xlabel(r'$\ln\left(\frac{K}{{\rm km}\,{\rm s}^{-1}}\right)$')
ax.xaxis.set_ticks(np.arange(-10, 8+1, 2));

Period dist. stats


In [ ]:
stars = session.query(AllStar).join(StarResult, JokerRun, Status).filter(Status.id == 4).all()
apogee_ids = np.array([star.apogee_id for star in stars])

In [ ]:
%%time
unimodals = []
kurtosis = []
skewness = []
with h5py.File(samples_file, 'r') as f:
    for star in stars:
        samples = JokerSamples.from_hdf5(f[star.apogee_id])
        P_uni = unimodal_P(samples, star.apogeervdata())
        unimodals.append(P_uni)
        
        lnP = np.log(samples['P'].value)
        
        # ~kurtosis
        pers = np.percentile(lnP, [2.5, 97.5, 16, 84])
        diffs = pers[1::2] - pers[::2]
        kurtosis.append(diffs[1] / diffs[0])
        
        # ~skewness
        Q1, Q2, Q3 = np.percentile(lnP, [25, 50, 75])
        skewness.append( (Q3+Q1-2*Q2) / (Q3-Q1) )

unimodals = np.array(unimodals)
kurtosis = np.array(kurtosis)
skewness = np.array(skewness)

In [ ]:
unimodals.sum()

In [ ]:
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
ax.scatter(kurtosis, skewness, marker='.', linewidth=0, alpha=0.2, s=3)
ax.scatter(0.5, 0.)

In [ ]:
test = (np.abs(kurtosis-0.5) < 0.01) & np.abs(skewness < 0.01)
test.sum()

In [ ]:
with h5py.File(samples_file) as f:
    for apogee_id in apogee_ids[test][:5]:
        star = AllStar.get_apogee_id(session, apogee_id)
        data = star.apogeervdata()
        samples = JokerSamples.from_hdf5(f[apogee_id])
        fig = plot_two_panel(data, samples, plot_data_orbits_kw=dict(xlim_choice='tight'))
        
        lnP = np.log(samples['P'].value)
        pers = np.percentile(lnP, [2.5, 97.5, 16, 84])
        diffs = pers[1::2] - pers[::2]
        fig.axes[0].set_title("{:.3f}".format(diffs[1]/diffs[0]))

In [ ]:
with h5py.File(samples_file) as f:
    for apogee_id in apogee_ids[unimodals][20:]:
        star = AllStar.get_apogee_id(session, apogee_id)
        data = star.apogeervdata()
        samples = JokerSamples.from_hdf5(f[apogee_id])
        fig = plot_two_panel(data, samples, plot_data_orbits_kw=dict(xlim_choice='tight'))
        
        lnP = np.log(samples['P'].value)
        pers = np.percentile(lnP, [2.5, 97.5, 16, 84])
        diffs = pers[1::2] - pers[::2]
        fig.axes[0].set_title("{:.3f}".format(diffs[1]/diffs[0]))

Compute acceleration metrics


In [ ]:
%%time

all_acc = None
all_apogee_ids = []
with h5py.File(samples_file) as f:
    for i, key in enumerate(f):
        if all_acc is None:
            all_acc = np.full((len(f), n_samples), np.nan)
            K_unit = u.Unit(f[key]['K'].attrs['unit'])
            P_unit = u.Unit(f[key]['P'].attrs['unit'])
        
        _n = len(f[key]['K'][:])
        all_acc[i, :_n] = f[key]['K'][:] / f[key]['P'][:]
        all_apogee_ids.append(key)
        
all_acc = all_acc * K_unit / P_unit
all_apogee_ids = np.array(all_apogee_ids)

In [ ]:
acc_per = np.nanpercentile(all_acc, 10, axis=1)

In [ ]:
fig, ax = plt.subplots(1, 1)

ax.hist(acc_per, 
        bins=np.logspace(-6, 2, 64));
ax.set_xscale('log')
ax.set_yscale('log')

ax.set_xlabel('$K / P$ [{0:latex_inline}]'.format(K_unit/P_unit))
ax.set_ylabel('$N$')
ax.set_title('10th percentile acc.')

In [ ]:
[(x, (acc_per > x).sum()) for x in 10**np.arange(-3, 1+0.1, 0.5)]

Stars with constraining data:


In [ ]:
# needs more samples
stars1 = session.query(AllStar).join(StarResult, JokerRun, Status)\
                .filter(JokerRun.name == 'apogee-jitter')\
                .filter(Status.id == 1).all()

# needs mcmc
stars2 = session.query(AllStar).join(StarResult, JokerRun, Status)\
                .filter(JokerRun.name == 'apogee-jitter')\
                .filter(Status.id == 2).all()

# HACK: only stars that pass Marie's cuts
# stars2 = session.query(AllStar).join(StarResult, JokerRun, Status)\
#                 .filter(JokerRun.name == 'apogee-jitter')\
#                 .filter((AllStar.logg < 3.5) & (AllStar.logg > -9999))\
#                 .filter(Status.id == 2)\
#                 .filter(AllStar.martig_filter).all() # only look at RGB for now
            
len(stars1), len(stars2)

In [ ]:
os.makedirs('../plots/needs-mcmc', exist_ok=True)

for star in stars2:
    with h5py.File(samples_file) as f:
        samples = JokerSamples.from_hdf5(f[star.apogee_id])
    data = star.apogeervdata()
    
    fig = plot_two_panel(data, samples)
    fig.savefig('../plots/needs-mcmc/{0}-2panel.png'.format(star.apogee_id), dpi=200)
    
    fig = plot_phase_fold(data, samples[0])
    fig.savefig('../plots/needs-mcmc/{0}-residual.png'.format(star.apogee_id), dpi=200)
    
    plt.close('all')

In [ ]:
all_m1 = u.Quantity([x.martig_mass for x in session.query(AllStar).filter(AllStar.martig_filter).all()])
plt.hist(all_m1.value, bins='auto');

Stars with large acceleration


In [ ]:
mask = ((acc_per > 1e-2) & (np.isin(all_apogee_ids, [x.apogee_id for x in stars1+stars2])))
sub_apogee_ids = all_apogee_ids[mask]
mask.sum()

In [ ]:
aid = sub_apogee_ids[10]
star = session.query(AllStar).filter(AllStar.apogee_id == aid).limit(1).one()

with h5py.File(samples_file) as f:
    samples = JokerSamples.from_hdf5(f[star.apogee_id])

_ = make_two_panel(star)

In [ ]:
%%time 

mean_Ps = []
mean_es = []
loggs = []
used_apogee_ids = []
with h5py.File(samples_file) as f:
    for aid in sub_apogee_ids:
        samples = JokerSamples.from_hdf5(f[aid])
        star = session.query(AllStar).filter(AllStar.apogee_id == aid).limit(1).one()
        data = star.apogeervdata()
        if unimodal_P(samples, data):
            mean_Ps.append(np.mean(samples['P']))
            mean_es.append(np.mean(samples['e']))
            loggs.append(star.logg)
            used_apogee_ids.append(aid)
            
mean_Ps = u.Quantity(mean_Ps)
mean_es = u.Quantity(mean_es)
loggs = np.array(loggs)
used_apogee_ids = np.array(used_apogee_ids)

In [ ]:
fig, ax = plt.subplots(1, 1, figsize=(7.2, 6))

c = ax.scatter(mean_Ps[loggs > 1.5], mean_es[loggs > 1.5], c=loggs[loggs > 1.5],
               marker='o', alpha=0.85, linewidth=1, s=28,
               edgecolor='#666666', cmap='viridis_r', vmin=1.5, vmax=3.5)
ax.set_xscale('log')
cb = fig.colorbar(c)
cb.set_label(r'$\log g$')

ax.set_xlim(0.8, 500)
ax.set_ylim(-0.05, 1.)

ax.set_xlabel(r'mean $P$ [day]')
ax.set_ylabel(r'eccentricity $e$')
fig.savefig('../plots/P_e_prelim_logg.png', dpi=256)

In [ ]:
aid = used_apogee_ids[(loggs > 1.5) & (loggs < 2.5) & (mean_Ps < 10*u.day)][2]
star = session.query(AllStar).filter(AllStar.apogee_id == aid).limit(1).one()
print(2 * (0.015 ** (1/3.*star.logg)))

apogee_id = star.apogee_id

with h5py.File(samples_file) as f:
    samples = JokerSamples.from_hdf5(f[apogee_id])

_ = make_two_panel(star)


In [ ]:
m2_mins = []
for star in stars2:
    with h5py.File(samples_file, mode='r') as f:
        samples = JokerSamples.from_hdf5(f[star.apogee_id])

    orbit = samples.get_orbit(0)
    res = root(func, x0=100., args=(orbit.elements.m_f.value, .5))
    m2_mins.append(res.x[0])

In [ ]:
all_pars = None
for star in stars2:
    data = star.apogeervdata(clean=True)
    
    with h5py.File(samples_file, mode='r') as f:
        samples = JokerSamples.from_hdf5(f[star.apogee_id])
    
    orbit = samples.get_orbit(0)
    
    if all_pars is None:
        all_pars = Table()
        
        for key in samples.keys():
            all_pars[key] = samples[key][:1]
            
        all_pars['snr'] = [star.snr]
        all_pars['logg'] = [star.logg]
        all_pars['fe_h'] = [star.fe_h]
        all_pars['fe_h_err'] = [star.fe_h_err]
        all_pars['nvisits'] = [len(data)]
        
    else:
        row = dict(samples[:1])
        row['snr'] = [star.snr]
        row['logg'] = [star.logg]
        row['fe_h'] = [star.fe_h]
        row['fe_h_err'] = [star.fe_h_err]
        row['nvisits'] = [len(data)]
        all_pars.add_row(row)

In [ ]:
all_pars['m2_min'] = m2_mins * u.Msun
all_pars['chi2'] = chisqs

In [ ]:
fig, ax = plt.subplots(1, 1, figsize=(12, 5))
cb_in = ax.scatter(all_pars['P'], all_pars['e'], 
                   c=all_pars['chi2'], marker='.',
                   vmin=1, vmax=50, cmap='Greys_r')
ax.set_xscale('log')

cb = fig.colorbar(cb_in)
cb.set_label(r'$\chi^2$')

ax.set_xlabel('$P$ [{0:latex_inline}]'.format(all_pars['P'].unit))
ax.set_ylabel('$e$')

In [ ]:
fig, ax = plt.subplots(1, 1, figsize=(8, 5))

sub = all_pars[all_pars['snr']>100]
cb_in = ax.scatter(sub['P'], sub['e'], marker='.', cmap='magma_r',
                   c=sub['nvisits'], vmin=3, vmax=20)
ax.set_xscale('log')
ax.set_xlim(1, 2000)
ax.set_xlabel('$P$ [{0:latex_inline}]'.format(all_pars['P'].unit))
ax.set_ylabel('$e$')
ax.set_title('SNR > 100')

cb = fig.colorbar(cb_in)
cb.set_label('$N$ visits')

In [ ]:
fig, ax = plt.subplots(1, 1, figsize=(6, 5))

sub = all_pars[ (all_pars['snr'] > 100) & 
                (all_pars['fe_h'] > -999)]

print(len(sub))

ax.errorbar(sub['P'], sub['fe_h'], yerr=sub['fe_h_err'],
            linestyle='none', marker='.', color='k')

ax.set_xscale('log')
ax.set_xlim(1, 2000)

# ax.set_xlabel('[Fe/H]')
# ax.set_ylabel(r'$M_{2, {\rm min}}$ ' + '[{0:latex_inline}]'.format(u.Msun))

# ax.set_title('log$g$ < 3.25, $\chi^2$ < 30')
fig.tight_layout()

In [ ]:
fig, ax = plt.subplots(1, 1, figsize=(6, 5))

sub = all_pars[ (all_pars['logg'] < 3.25) & 
                (all_pars['logg'] > -999) & 
                (all_pars['fe_h'] > -999) &
                (all_pars['chi2'] < 30)]

print(len(sub))

ax.errorbar(sub['fe_h'], sub['m2_min'], xerr=sub['fe_h_err'],
            linestyle='none', marker='.', color='k')
ax.set_yscale('log')

ax.set_ylabel(r'$M_{2, {\rm min}}$ ' + '[{0:latex_inline}]'.format(u.Msun))
ax.set_xlabel('[Fe/H]')

ax.set_title('log$g$ < 3.25, $\chi^2$ < 30')
fig.tight_layout()

In [ ]:
fig, ax = plt.subplots(1, 1, figsize=(6, 5))

c = ax.scatter(sub['chi2'], sub['m2_min'], c=sub['nvisits'], 
               vmin=3, vmax=20, marker='.')
ax.set_yscale('log')
ax.set_ylim(1E-3, 1e2)
ax.set_xlim(0, 50)

ax.set_xlabel(r'$\chi^2$')
ax.set_ylabel(r'$M_{2, {\rm min}}$ ' + '[{0:latex_inline}]'.format(u.Msun))

cb = fig.colorbar(c)
cb.set_label('$N$ visits')

In [ ]:
fig, axes = plt.subplots(1, 2, sharex=True, figsize=(10,4))

n, bins, _ = axes[0].hist(all_pars['M0'], bins='auto');
binc = (bins[:-1]+bins[1:])/2.
axes[0].errorbar(binc, n, np.sqrt(n), marker='', linestyle='none')
axes[0].set_xlabel('$M_0$ [rad]')

n, bins, _ = axes[1].hist(all_pars['omega'], bins='auto');
binc = (bins[:-1]+bins[1:])/2.
axes[1].errorbar(binc, n, np.sqrt(n), marker='', linestyle='none')
axes[1].set_xlabel('$\omega$ [rad]')

axes[1].axvline(np.pi)

In [ ]:
from scipy.stats import beta

In [ ]:
sub = all_pars[all_pars['snr'] > 100]
bins = np.linspace(0, 1, 13)

fig, axes = plt.subplots(1, 2, figsize=(10, 5), sharex=True)

mask = sub['P'] < 20
axes[0].hist(sub[mask]['e'], bins=bins, normed=True, alpha=0.8);
axes[0].set_title(r'$P < 20\,{{\rm d}}$ ({0} stars)'.format(mask.sum()))

axes[1].hist(sub[~mask]['e'], bins=bins, normed=True, alpha=0.8);
axes[1].set_title(r'$P > 20\,{{\rm d}}$ ({0} stars)'.format(np.logical_not(mask).sum()))

ecc = np.linspace(0, 1, 100)
for ax in axes:
    ax.plot(ecc, beta.pdf(ecc, 0.867, 3.03), marker='', label='prior')
    ax.set_xlabel('eccentricity, $e$')
    
fig.tight_layout()

In [ ]:


Estimate primary mass using Martig et al. fits

From FPARAM, do cuts:

  • M_H > -0.8
  • 4000 < TEFF < 5000
  • 1.8 < LOGG < 3.3
  • -0.25 C_M < 0.15
  • -0.1 < N_M < 0.45
  • -0.1 < CN_M < 0.15
  • -0.6 < C_N < 0.2

FPARAM:

  • 0 - Teff
  • 1 - logg
  • 2 - vmicro
  • 3 - [M/H]
  • 4 - [C/M]
  • 5 - [N/M]

Now all in twoface/mass.py


In [ ]:
stars = session.query(AllStar).join(StarResult, JokerRun)\
               .group_by(AllStar.apstar_id)\
               .filter(JokerRun.name == 'apogee-jitter')\
               .filter(StarResult.status_id > 0)\
               .filter(AllStar.martig_filter).all()
len(stars)

In [ ]:
def get_m2_samples(star, samples, n_mass_samples=128):

    # Estimate secondary mass
    Merr = 0.25 # from Martig et al.
    m1s = np.random.normal(star.martig_mass, Merr, size=n_mass_samples) * u.Msun
    sini = np.sin(np.arccos(1 - np.random.uniform(size=n_mass_samples)))
    mfs = mf(samples['P'], samples['K'], samples['ecc']).to(u.Msun).value

    m2s = []
    for k in range(len(mfs)):
        for i in range(n_mass_samples):
            res = root(m2_func, 0.1, args=(m1s.value[i], sini[i], mfs[k]))
            if not res.success:
                print("Failed")
                m2s.append(1E-10)
                continue

            m2s.append(res.x[0])
    m2s = m2s*u.Msun
    m2s = m2s.reshape(len(samples), n_mass_samples)
    return m2s

def get_m2_min_samples(star, samples, n_mass_samples=128):

    # Estimate secondary mass
    Merr = 0.25 # from Martig et al.
    m1s = np.random.normal(star.martig_mass, Merr, size=n_mass_samples) * u.Msun
    mfs = mf(samples['P'], samples['K'], samples['ecc']).to(u.Msun).value

    m2s = []
    failure = False
    for k in range(len(mfs)):
        for i in range(n_mass_samples):
            res = root(m2_func, 0.1, args=(m1s.value[i], 1., mfs[k]))
            if not res.success:
                failure = True
                m2s.append(1E-10)
                print(k, m1s.value[i], mfs[k])
                continue

            m2s.append(res.x[0])
    m2s = m2s*u.Msun
    m2s = m2s.reshape(len(samples), n_mass_samples)
    
    if failure:
        print("Star {0}: failed to compute m2 for some samples.".format(star.apogee_id))
    
    return m2s

In [ ]:
def m2_samples_plot(star, ax=None, min=True):
        
    if ax is None:
        fig, ax = plt.subplots(1, 1)
    
    else:
        fig = ax.figure

    # load posterior samples from The Joker
    samples = load_samples(samples_file, star.apogee_id)
    if min:
        m2s = get_m2_min_samples(star, samples)
        xlabel = r'M_{2, {\rm min}}'
    else:
        m2s = get_m2_samples(star, samples)
        xlabel = r'M_2'
    
    # bins = np.linspace(0.1, 10, 41)
    bins = np.logspace(-3, 1, 31)
    ax.set_xscale('log')
        
    # Now also r_peri^2
    # a1 = np.cbrt(samples['P'][:,None]**2 / (4*np.pi**2 / (G * (m1s[None]+m2s))))
    # r_peri2 = (m1s[None]/m2s) * a1.to(u.au) * (1-samples['ecc'][:,None])
    
    ax.hist(m2s.to(u.Msun).value.ravel(), bins=bins, normed=True)
    ax.set_xlabel('${0}$ [{1:latex_inline}]'.format(xlabel, u.Msun))
#     ax.set_xscale('log')

    return fig, ax

In [ ]:
m1s = np.array([s.martig_mass for s in stars])

In [ ]:
K_percentiles = np.zeros((len(stars), 3))
i = 0
for star in tqdm.tqdm(stars):
    samples = load_samples(samples_file, star.apogee_id)
    K_percentiles[i] = scoreatpercentile(samples['K'].to(u.km/u.s).value, [15., 50., 85])
    i += 1

In [ ]:
m2_min_percentiles = np.zeros((len(stars), 3))
i = 0
for star in tqdm.tqdm(stars):
    m2s = get_m2_min_samples(star, load_samples(samples_file, star.apogee_id),
                             n_mass_samples=8)
    m2_min_percentiles[i] = scoreatpercentile(m2s.value.ravel(), [15., 50., 85])
    i += 1

In [ ]:
# Where the companion is probably massive
big_m2_idx, = np.where(m2_min_percentiles[:,0] > 1.)
print(len(big_m2_idx))

# Where the companion mass is well-constrained
constrained_m2_idx, = np.where(((m2_min_percentiles[:,2] - m2_min_percentiles[:,0]) < 0.1) & 
                                (m2_min_percentiles[:,0] > 0.01) & 
                                (K_percentiles[:,0] > 0.4))
print(len(constrained_m2_idx))

# Most of these are negative primary masses lol
# bigger_m2_idx, = np.where(m2_min_percentiles[:,0] > m1s)
# print(len(bigger_m2_idx))

In [ ]:
# star = stars[big_m2_idx[0]]
star = stars[constrained_m2_idx[61]]
# star = stars[bigger_m2_idx[3]]

data = star.apogeervdata(clean=True)
samples = load_samples(samples_file, star.apogee_id)

fig, axes = plt.subplots(1, 2, figsize=(10,5))

_ = plot_data_orbits(data, samples, ax=axes[0], xlim_choice='tight')
_ = m2_samples_plot(star, min=True, ax=axes[1])

axes[1].set_xlim(0, 8)
axes[1].axvspan(star.martig_mass-0.25, star.martig_mass+0.25, 
                color='tab:orange', alpha=0.4, linewidth=0)
axes[1].axvline(star.martig_mass, color='tab:orange', alpha=0.6)

In [ ]:
# x = np.random.uniform(0., 10, size=10000)
# plt.hist(x, bins=np.logspace(-3, 1, 51), log=True, normed=True);
# # n, bins, _ = np.histogram(x, bins=np.logspace(-3, 1, 51), log=True);
# plt.xscale('log')

Make a bunch of plots:


In [ ]:
stars = session.query(AllStar)\
               .join(StarResult, JokerRun, Status)\
               .filter(Status.id == 1).all()

In [ ]:
def make_plots(star, save=False):
    
    if len(star.results) != 1:
        raise NotImplementedError()
        
    msg = '-'.join(star.results[0].status.message.split())
    plot_path = '../plots/{0}'.format(msg)
    
    if not path.exists(plot_path):
        os.makedirs(plot_path)
        
    run = star.results[0].jokerrun

    # get the RV data for this star
    data = star.apogeervdata()
    data = data[data.stddev < 10*u.km/u.s]

    # load posterior samples from The Joker
    samples_path = path.join(TWOFACE_CACHE_PATH, '{0}.hdf5'.format(run.name))
    samples_dict = load_samples(samples_path, star.apogee_id)

    samples = JokerSamples(trend_cls=VelocityTrend1, **samples_dict)

    # Plot the data with orbits on top
    fig = plot_data_orbits(data, samples, jitter=run.jitter, 
                           xlim_choice='wide', title=star.apogee_id)
    fig.set_tight_layout(True)
    
    if save:
        fig.savefig(path.join(plot_path, '{0}-orbits.png'.format(star.apogee_id)), 
                    dpi=250)

    # fig = plot_data_orbits(data, samples, jitter=run.jitter,
    #                        xlim_choice='tight', title=star.apogee_id)
    # fig.set_tight_layout(True)
    
    # plot samples
    fig, axes = plt.subplots(1, 2, figsize=(12, 6), sharey=True)
    
    ax = axes[0]
    ax.scatter(samples['P'].value,
               samples['K'].to(u.km/u.s).value,
               marker='.', color='k', alpha=0.45)

    ax.set_xlabel("$P$ [day]")
    ax.set_ylabel("$K$ [{0:latex_inline}]".format(u.km/u.s))
    ax.set_xscale('log')
    ax.set_yscale('log')
    
    ax.set_xlim(8, 32768)
    ax.set_ylim(1E-3, 1E3)
    
    # plot 2
    ax = axes[1]
    ax.scatter(samples['ecc'],
               samples['K'].to(u.km/u.s).value,
               marker='.', color='k', alpha=0.45)
    ax.set_xlabel("$e$")
    ax.set_xlim(0, 1)
    
    if save:
        fig.savefig(path.join(plot_path, '{0}-samples.png'.format(star.apogee_id)), 
                        dpi=250)
    
    # HACK: estimate secondary masses
    # As a total hack, for now, assume 1.25 +/- 0.25 Msun (what the Martig sample looks like)
    n_mass_samples = 128
    m1s = np.random.normal(1.25, 0.25, size=n_mass_samples) * u.Msun
    sini = np.sin(np.arccos(1 - np.random.uniform(size=n_mass_samples)))
    mfs = mf(samples['P'], samples['K'], samples['ecc']).to(u.Msun).value

    m2s = []
    for k in range(len(mfs)):
        for i in range(n_mass_samples):
            res = root(m2_func, 0.1, args=(m1s.value[i], sini[i], mfs[k]))
            if not res.success:
                print("Failed")
                m2s.append(1E-10)
                continue

            m2s.append(res.x[0])
    m2s = m2s*u.Msun
    m2s = m2s.reshape(len(samples), n_mass_samples)
    
    if np.median(m2s) < 0.02*u.Msun:
        M_unit = u.Mjup
        bins = np.logspace(0, 3, 51)
    else:
        M_unit = u.Msun
        bins = np.logspace(-3, 2, 51)
        
    # Now also r_peri^2
    a1 = np.cbrt(samples['P'][:,None]**2 / (4*np.pi**2 / (G * (m1s[None]+m2s))))
    r_peri2 = (m1s[None]/m2s) * a1.to(u.au) * (1-samples['ecc'][:,None])
    
    fig,axes = plt.subplots(1, 2, figsize=(12,6))
    
    ax = axes[0]
    ax.hist(m2s.to(M_unit).value.ravel(), bins=bins)
    ax.set_xlabel('$M$ [{0:latex_inline}]'.format(M_unit))
    ax.set_xscale('log')
    
    ax = axes[1]
    ax.hist(r_peri2.to(u.au).value.ravel(), bins=np.logspace(-3, 2, 64))
    ax.axvline((10*u.R_sun).to(u.au).value, 
               color='tab:red', zorder=-10, alpha=0.2) # RC radius
    ax.axvline(1., color='tab:red', zorder=-10, alpha=0.2) # RGB radius
    ax.set_xlabel(r'$r_{\rm peri, 2}$ [AU]')
    ax.set_xscale('log')
    
    fig.tight_layout()
    
    if save:
        fig.savefig(path.join(plot_path, '{0}-m2.png'.format(star.apogee_id)), 
                    dpi=250)

    if save:
        plt.close('all')

In [ ]:
np.random.seed(42)
for i in np.random.choice(len(stars), size=16, replace=False):
    print(i)
    star = stars[i]
    make_plots(star, save=True)